From: iap10@tetris.cl.cam.ac.uk Date: Fri, 2 Apr 2004 13:58:27 +0000 (+0000) Subject: bitkeeper revision 1.833 (406d7183Ckz-sf5jTa2XrtbKezR-dQ) X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~18288 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22?a=commitdiff_plain;h=3456cf6f1b5be8d96439fcf79cbb323b2f2ac914;p=xen.git bitkeeper revision 1.833 (406d7183Ckz-sf5jTa2XrtbKezR-dQ) shadow cleanup --- diff --git a/xen/common/memory.c b/xen/common/memory.c index 01d3aeb181..17a0b71a8b 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1128,21 +1128,11 @@ int do_update_va_mapping(unsigned long page_nr, if ( unlikely(p->mm.shadow_mode) ) { - unsigned long sval = 0; + unsigned long sval; - // XXX this only works for l1 entries, with no translation + l1pte_no_fault( ¤t->mm, &val, &sval ); - if ( (val & _PAGE_PRESENT) && (val & _PAGE_ACCESSED) ) - { - sval = val; - if ( !(val & _PAGE_DIRTY) ) - sval &= ~_PAGE_RW; - } - - /* printk("update_va_map: page_nr=%08lx val =%08lx sval =%08lx\n", - page_nr, val, sval);*/ - - if ( __put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) ) + if ( unlikely(__put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) ) ) { // Since L2's are guranteed RW, failure indicates the page // was not shadowed, so ignore. diff --git a/xen/common/shadow.c b/xen/common/shadow.c index 8704151799..8e7b53db86 100644 --- a/xen/common/shadow.c +++ b/xen/common/shadow.c @@ -1,4 +1,4 @@ -/* -*- Mode:C++; c-set-style:BSD; c-basic-offset:4; tab-width:4 -*- */ +/* -*- Mode:C++; c-file-style:BSD; c-basic-offset:4; tab-width:4 -*- */ #include #include @@ -27,9 +27,9 @@ hypercall lock anyhow (at least initially). ********/ static inline void free_shadow_page( struct mm_struct *m, - struct pfn_info *pfn_info ) + struct pfn_info *pfn_info ) { - unsigned long flags; + unsigned long flags; unsigned long type = pfn_info->type_and_flags & PGT_type_mask; m->shadow_page_count--; @@ -289,8 +289,8 @@ static void shadow_mode_table_op( struct task_struct *p, unsigned int op ) case DOM0_SHADOW_CONTROL_OP_CLEAN: __scan_shadow_table( m, op ); - if( m->shadow_dirty_bitmap ) - memset(m->shadow_dirty_bitmap,0,m->shadow_dirty_bitmap_size/8); + // we used to bzero dirty bitmap here, but now leave this to user space + // if we were double buffering we'd do the flip here break; } @@ -355,6 +355,25 @@ static inline struct pfn_info *alloc_shadow_page( struct mm_struct *m ) /************************************************************************/ +static inline void mark_dirty( struct mm_struct *m, unsigned int mfn ) +{ + unsigned int pfn = machine_to_phys_mapping[mfn]; + ASSERT(m->shadow_dirty_bitmap); + if( likely(pfnshadow_dirty_bitmap_size) ) + { + // XXX use setbit + m->shadow_dirty_bitmap[pfn/(sizeof(int)*8)] |= + (1<<(pfn%(sizeof(int)*8))); + } + else + { + printk("XXXX mark dirty overflow!"); + } + +} + +/************************************************************************/ + static inline void l1pte_write_fault( struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p ) { @@ -370,6 +389,10 @@ static inline void l1pte_write_fault( struct mm_struct *m, break; case SHM_logdirty: + spte = gpte; + gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; + spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; + mark_dirty( m, gpte >> PAGE_SHIFT ); break; } @@ -394,6 +417,10 @@ static inline void l1pte_read_fault( struct mm_struct *m, break; case SHM_logdirty: + spte = gpte; + gpte |= _PAGE_ACCESSED; + spte |= _PAGE_ACCESSED; + spte &= ~ _PAGE_RW; break; } @@ -414,12 +441,21 @@ static inline void l1pte_no_fault( struct mm_struct *m, if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == (_PAGE_PRESENT|_PAGE_ACCESSED) ) { + spte = gpte; if ( ! (gpte & _PAGE_DIRTY ) ) spte &= ~ _PAGE_RW; } break; case SHM_logdirty: + spte = 0; + if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == + (_PAGE_PRESENT|_PAGE_ACCESSED) ) + { + spte = gpte; + spte &= ~ _PAGE_RW; + } + break; } @@ -427,6 +463,33 @@ static inline void l1pte_no_fault( struct mm_struct *m, *spte_p = spte; } +static inline void l2pde_general( struct mm_struct *m, + unsigned long *gpde_p, unsigned long *spde_p, + unsigned long sl1pfn) +{ + unsigned long gpde = *gpde_p; + unsigned long spde = *spde_p; + + spde = 0; + + if ( sl1pfn ) + { + spde = (gpde & ~PAGE_MASK) | (sl1pfn<mm, gpfn); -#if 0 // XXX leave as might be useful for later debugging - { - int i; - unsigned long * spl1e = map_domain_mem( spfn<mm, &frame_table[spfn] ); } @@ -526,21 +576,7 @@ unsigned long shadow_l2_table( unsigned long s_sh = __shadow_status(p, gpte>>PAGE_SHIFT); - if( s_sh & PSH_shadowed ) // PSH_shadowed - { - if ( unlikely( (__shadow_status(p, gpte>>PAGE_SHIFT) & PGT_type_mask) == PGT_l2_page_table) ) - { - printk("Linear mapping detected\n"); - spte = gpte & ~_PAGE_RW; - } - else - { - spte = ( gpte & ~PAGE_MASK ) | (s_sh<mm, gl1pfn, PSH_shadowed | sl1pfn); - gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY; - spde = (gpde & ~PAGE_MASK) | _PAGE_RW | (sl1pfn<>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(gpde); shadow_linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(spde); @@ -690,16 +724,7 @@ int shadow_fault( unsigned long va, long error_code ) SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )",sl1pfn); - spde = (gpde & ~PAGE_MASK) | (sl1pfn<>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(gpde); shadow_linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(spde); @@ -781,18 +806,8 @@ void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte ) sp2le = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT ); // no real need for a cache here - - if ( s_sh ) // PSH_shadowed - { - if ( unlikely( (frame_table[gpte>>PAGE_SHIFT].type_and_flags & PGT_type_mask) == PGT_l2_page_table) ) - { - // linear page table case - spte = (gpte & ~_PAGE_RW) | _PAGE_DIRTY | _PAGE_ACCESSED; - } - else - spte = (gpte & ~PAGE_MASK) | (s_sh<mm, &gpte, &spte, s_sh ); // XXXX Should mark guest pte as DIRTY and ACCESSED too!!!!!